#include <xen/event.h>
#include <xen/sched.h>
#include <asm/hvm/domain.h>
+#include <asm/hvm/support.h>
static void __hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx)
{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi, link, isa_irq;
ASSERT((device <= 31) && (intx <= 3));
- if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx) )
+ if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
return;
gsi = hvm_pci_intx_gsi(device, intx);
vioapic_irq_positive_edge(d, gsi);
link = hvm_pci_intx_link(device, intx);
- isa_irq = hvm_irq->pci_link_route[link];
+ isa_irq = hvm_irq->pci_link.route[link];
if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
(hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
{
static void __hvm_pci_intx_deassert(
struct domain *d, unsigned int device, unsigned int intx)
{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi, link, isa_irq;
ASSERT((device <= 31) && (intx <= 3));
- if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx) )
+ if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
return;
gsi = hvm_pci_intx_gsi(device, intx);
--hvm_irq->gsi_assert_count[gsi];
link = hvm_pci_intx_link(device, intx);
- isa_irq = hvm_irq->pci_link_route[link];
+ isa_irq = hvm_irq->pci_link.route[link];
if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
(--hvm_irq->gsi_assert_count[isa_irq] == 0) )
vpic_irq_negative_edge(d, isa_irq);
void hvm_isa_irq_assert(
struct domain *d, unsigned int isa_irq)
{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
ASSERT(isa_irq <= 15);
spin_lock(&d->arch.hvm_domain.irq_lock);
- if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq) &&
+ if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(hvm_irq->gsi_assert_count[gsi]++ == 0) )
{
vioapic_irq_positive_edge(d, gsi);
void hvm_isa_irq_deassert(
struct domain *d, unsigned int isa_irq)
{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
ASSERT(isa_irq <= 15);
spin_lock(&d->arch.hvm_domain.irq_lock);
- if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq) &&
+ if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(--hvm_irq->gsi_assert_count[gsi] == 0) )
vpic_irq_negative_edge(d, isa_irq);
{
struct vcpu *v = current;
struct domain *d = v->domain;
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi, pdev, pintx, asserted;
/* Fast lock-free tests. */
void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
u8 old_isa_irq;
ASSERT((link <= 3) && (isa_irq <= 15));
spin_lock(&d->arch.hvm_domain.irq_lock);
- old_isa_irq = hvm_irq->pci_link_route[link];
+ old_isa_irq = hvm_irq->pci_link.route[link];
if ( old_isa_irq == isa_irq )
goto out;
- hvm_irq->pci_link_route[link] = isa_irq;
+ hvm_irq->pci_link.route[link] = isa_irq;
if ( hvm_irq->pci_link_assert_count[link] == 0 )
goto out;
void hvm_set_callback_via(struct domain *d, uint64_t via)
{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
unsigned int gsi=0, pdev=0, pintx=0;
uint8_t via_type;
(1 << (isa_irq & 7))) &&
domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
}
+
+#if 0 /* Keep for debugging */
+static void irq_dump(struct domain *d)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ int i;
+ printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
+ " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
+ hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1],
+ (uint32_t) hvm_irq->isa_irq.pad[0],
+ hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
+ hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
+ for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
+ printk("GSI %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
+ " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
+ hvm_irq->gsi_assert_count[i+0],
+ hvm_irq->gsi_assert_count[i+1],
+ hvm_irq->gsi_assert_count[i+2],
+ hvm_irq->gsi_assert_count[i+3],
+ hvm_irq->gsi_assert_count[i+4],
+ hvm_irq->gsi_assert_count[i+5],
+ hvm_irq->gsi_assert_count[i+6],
+ hvm_irq->gsi_assert_count[i+7]);
+ printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
+ hvm_irq->pci_link_assert_count[0],
+ hvm_irq->pci_link_assert_count[1],
+ hvm_irq->pci_link_assert_count[2],
+ hvm_irq->pci_link_assert_count[3]);
+ printk("Callback via %i:0x%"PRIx32",%s asserted\n",
+ hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
+ hvm_irq->callback_via_asserted ? "" : " not");
+}
+#endif
+
+static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ /* Save PCI IRQ lines */
+ return ( hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx) );
+}
+
+static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ /* Save ISA IRQ lines */
+ return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
+}
+
+static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ /* Save PCI-ISA link state */
+ return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
+}
+
+static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ int link, dev, intx, gsi;
+
+ /* Load the PCI IRQ lines */
+ if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
+ return -EINVAL;
+
+ /* Clear the PCI link assert counts */
+ for ( link = 0; link < 4; link++ )
+ hvm_irq->pci_link_assert_count[link] = 0;
+
+ /* Clear the GSI link assert counts */
+ for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
+ hvm_irq->gsi_assert_count[gsi] = 0;
+
+ /* Recalculate the counts from the IRQ line state */
+ for ( dev = 0; dev < 32; dev++ )
+ for ( intx = 0; intx < 4; intx++ )
+ if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
+ {
+ /* Direct GSI assert */
+ gsi = hvm_pci_intx_gsi(dev, intx);
+ hvm_irq->gsi_assert_count[gsi]++;
+ /* PCI-ISA bridge assert */
+ link = hvm_pci_intx_link(dev, intx);
+ hvm_irq->pci_link_assert_count[link]++;
+ }
+
+ return 0;
+}
+
+static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ int irq;
+
+ /* Load the ISA IRQ lines */
+ if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
+ return -EINVAL;
+
+ /* Adjust the GSI assert counts for the ISA IRQ line state.
+ * This relies on the PCI IRQ state being loaded first. */
+ for ( irq = 0; irq < 16; irq++ )
+ if ( test_bit(irq, &hvm_irq->isa_irq.i) )
+ hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
+
+ return 0;
+}
+
+
+static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ int link, gsi;
+
+ /* Load the PCI-ISA IRQ link routing table */
+ if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
+ return -EINVAL;
+
+ /* Sanity check */
+ for ( link = 0; link < 4; link++ )
+ if ( hvm_irq->pci_link.route[link] > 15 )
+ {
+ gdprintk(XENLOG_ERR,
+ "HVM restore: PCI-ISA link %u out of range (%u)\n",
+ link, hvm_irq->pci_link.route[link]);
+ return -EINVAL;
+ }
+
+ /* Adjust the GSI assert counts for the link outputs.
+ * This relies on the PCI and ISA IRQ state being loaded first */
+ for ( link = 0; link < 4; link++ )
+ {
+ if ( hvm_irq->pci_link_assert_count[link] != 0 )
+ {
+ gsi = hvm_irq->pci_link.route[link];
+ if ( gsi != 0 )
+ hvm_irq->gsi_assert_count[gsi]++;
+ }
+ }
+
+ return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci);
+HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa);
+HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link);
struct hvm_hw_vioapic *vioapic, unsigned int idx, int top_word, uint32_t val)
{
struct domain *d = vioapic_domain(vioapic);
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
union vioapic_redir_entry *pent, ent;
spin_lock(&d->arch.hvm_domain.irq_lock);
void vioapic_update_EOI(struct domain *d, int vector)
{
struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
union vioapic_redir_entry *ent;
int gsi;
}
}
-static void hvmirq_info(struct hvm_hw_irq *hvm_irq)
-{
- int i;
- printk("*****hvmirq state:*****\n");
- for (i = 0; i < BITS_TO_LONGS(32*4); i++)
- printk("hvmirq pci_intx[%d]:0x%lx.\n", i, hvm_irq->pci_intx[i]);
-
- for (i = 0; i < BITS_TO_LONGS(16); i++)
- printk("hvmirq isa_irq[%d]:0x%lx.\n", i, hvm_irq->isa_irq[i]);
-
- for (i = 0; i < BITS_TO_LONGS(1); i++)
- printk("hvmirq callback_irq_wire[%d]:0x%lx.\n", i, hvm_irq->callback_irq_wire[i]);
-
- printk("hvmirq callback_via_type:0x%x.\n", hvm_irq->callback_via_type);
- printk("hvmirq callback_via:0x%x.\n", hvm_irq->callback_via.gsi);
-
-
- for (i = 0; i < 4; i++)
- printk("hvmirq pci_link_route[%d]:0x%"PRIx8".\n", i, hvm_irq->pci_link_route[i]);
-
- for (i = 0; i < 4; i++)
- printk("hvmirq pci_link_assert_count[%d]:0x%"PRIx8".\n", i, hvm_irq->pci_link_assert_count[i]);
-
- for (i = 0; i < VIOAPIC_NUM_PINS; i++)
- printk("hvmirq gsi_assert_count[%d]:0x%"PRIx8".\n", i, hvm_irq->gsi_assert_count[i]);
-
- printk("hvmirq round_robin_prev_vcpu:0x%"PRIx8".\n", hvm_irq->round_robin_prev_vcpu);
-}
#else
static void ioapic_info(struct hvm_hw_vioapic *s)
{
}
-static void hvmirq_info(struct hvm_hw_irq *hvm_irq)
-{
-}
#endif
return ( hvm_save_entry(IOAPIC, 0, h, s) );
}
-static int ioapic_save_irqs(struct domain *d, hvm_domain_context_t *h)
-{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
- hvmirq_info(hvm_irq);
-
- /* save IRQ state*/
- return ( hvm_save_entry(IRQ, 0, h, hvm_irq) );
-}
-
-
static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_hw_vioapic *s = domain_vioapic(d);
return 0;
}
-static int ioapic_load_irqs(struct domain *d, hvm_domain_context_t *h)
-{
- struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
-
- /* restore irq state */
- if ( hvm_load_entry(IRQ, h, hvm_irq) != 0 )
- return -EINVAL;
-
- hvmirq_info(hvm_irq);
- return 0;
-}
-
HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load);
-HVM_REGISTER_SAVE_RESTORE(IRQ, ioapic_save_irqs, ioapic_load_irqs);
void vioapic_init(struct domain *d)
{
/*
- * IRQ
+ * IRQs
*/
-struct hvm_hw_irq {
+struct hvm_hw_pci_irqs {
/*
* Virtual interrupt wires for a single PCI bus.
* Indexed by: device*4 + INTx#.
*/
- DECLARE_BITMAP(pci_intx, 32*4);
+ union {
+ DECLARE_BITMAP(i, 32*4);
+ uint64_t pad[2];
+ };
+};
+
+DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
+struct hvm_hw_isa_irqs {
/*
* Virtual interrupt wires for ISA devices.
* Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
*/
- DECLARE_BITMAP(isa_irq, 16);
-
- /* Virtual interrupt and via-link for paravirtual platform driver. */
- uint32_t callback_via_asserted;
union {
- enum {
- HVMIRQ_callback_none,
- HVMIRQ_callback_gsi,
- HVMIRQ_callback_pci_intx
- } callback_via_type;
- uint32_t pad; /* So the next field will be aligned */
+ DECLARE_BITMAP(i, 16);
+ uint64_t pad[1];
};
- union {
- uint32_t gsi;
- struct { uint8_t dev, intx; } pci;
- } callback_via;
+};
+
+DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
+struct hvm_hw_pci_link {
/*
* PCI-ISA interrupt router.
* Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
* the traditional 'barber's pole' mapping ((device + INTx#) & 3).
* The router provides a programmable mapping from each link to a GSI.
*/
- u8 pci_link_route[4];
-
- /* Number of INTx wires asserting each PCI-ISA link. */
- u8 pci_link_assert_count[4];
-
- /*
- * Number of wires asserting each GSI.
- *
- * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
- * except ISA IRQ 0, which is connected to GSI 2.
- * PCI links map into this space via the PCI-ISA bridge.
- *
- * GSIs 16+ are used only be PCI devices. The mapping from PCI device to
- * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16
- */
- u8 gsi_assert_count[VIOAPIC_NUM_PINS];
-
- /*
- * GSIs map onto PIC/IO-APIC in the usual way:
- * 0-7: Master 8259 PIC, IO-APIC pins 0-7
- * 8-15: Slave 8259 PIC, IO-APIC pins 8-15
- * 16+ : IO-APIC pins 16+
- */
-
- /* Last VCPU that was delivered a LowestPrio interrupt. */
- u8 round_robin_prev_vcpu;
+ u8 route[4];
};
-DECLARE_HVM_SAVE_TYPE(IRQ, 7, struct hvm_hw_irq);
+DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
/*
uint32_t speaker_data_on;
};
-DECLARE_HVM_SAVE_TYPE(PIT, 8, struct hvm_hw_pit);
+DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
/*
uint8_t cmos_index;
};
-DECLARE_HVM_SAVE_TYPE(RTC, 9, struct hvm_hw_rtc);
+DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
/*
uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
};
-DECLARE_HVM_SAVE_TYPE(HPET, 10, struct hvm_hw_hpet);
+DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 10
+#define HVM_SAVE_CODE_MAX 12
/*